+++ /dev/null
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <bl31.h>
-#include <cpu_data.h>
-#include <platform.h>
-
-/*******************************************************************************
- * This duplicates what the primary cpu did after a cold boot in BL1. The same
- * needs to be done when a cpu is hotplugged in. This function could also over-
- * ride any EL3 setup done by BL1 as this code resides in rw memory.
- ******************************************************************************/
-void bl31_arch_setup(void)
-{
- /* Program the counter frequency */
- write_cntfrq_el0(plat_get_syscnt_freq2());
-
- /* Initialize the cpu_ops pointer. */
- init_cpu_ops();
-}
/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include <arch.h>
#include <bl_common.h>
#include <el3_common_macros.S>
+#include <xlat_tables.h>
.globl bl31_entrypoint
-
+ .globl bl31_warm_entrypoint
/* -----------------------------------------------------
* bl31_entrypoint() is the cold boot entrypoint,
b el3_exit
endfunc bl31_entrypoint
+
+ /* --------------------------------------------------------------------
+ * This CPU has been physically powered up. It is either resuming from
+ * suspend or has simply been turned on. In both cases, call the BL31
+ * warmboot entrypoint
+ * --------------------------------------------------------------------
+ */
+func bl31_warm_entrypoint
+ /*
+ * On the warm boot path, most of the EL3 initialisations performed by
+ * 'el3_entrypoint_common' must be skipped:
+ *
+ * - Only when the platform bypasses the BL1/BL31 entrypoint by
+ * programming the reset address do we need to set the CPU endianness.
+ * In other cases, we assume this has been taken care by the
+ * entrypoint code.
+ *
+ * - No need to determine the type of boot, we know it is a warm boot.
+ *
+ * - Do not try to distinguish between primary and secondary CPUs, this
+ * notion only exists for a cold boot.
+ *
+ * - No need to initialise the memory or the C runtime environment,
+ * it has been done once and for all on the cold boot path.
+ */
+ el3_entrypoint_common \
+ _set_endian=PROGRAMMABLE_RESET_ADDRESS \
+ _warm_boot_mailbox=0 \
+ _secondary_cold_boot=0 \
+ _init_memory=0 \
+ _init_c_runtime=0 \
+ _exception_vectors=runtime_exceptions
+
+ /* --------------------------------------------
+ * Enable the MMU with the DCache disabled. It
+ * is safe to use stacks allocated in normal
+ * memory as a result. All memory accesses are
+ * marked nGnRnE when the MMU is disabled. So
+ * all the stack writes will make it to memory.
+ * All memory accesses are marked Non-cacheable
+ * when the MMU is enabled but D$ is disabled.
+ * So used stack memory is guaranteed to be
+ * visible immediately after the MMU is enabled
+ * Enabling the DCache at the same time as the
+ * MMU can lead to speculatively fetched and
+ * possibly stale stack memory being read from
+ * other caches. This can lead to coherency
+ * issues.
+ * --------------------------------------------
+ */
+ mov x0, #DISABLE_DCACHE
+ bl bl31_plat_enable_mmu
+
+ bl psci_warmboot_entrypoint
+
+ b el3_exit
+endfunc bl31_warm_entrypoint
BL31_SOURCES += bl31/bl31_main.c \
bl31/interrupt_mgmt.c \
- bl31/aarch64/bl31_arch_setup.c \
bl31/aarch64/bl31_entrypoint.S \
bl31/aarch64/runtime_exceptions.S \
bl31/aarch64/crash_reporting.S \
/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
void bl31_lib_init(void)
{
cm_init();
+
+ /*
+ * Initialize the PSCI library here. This also does EL3 architectural
+ * setup.
+ */
+ psci_setup((uintptr_t)bl31_warm_entrypoint);
}
/*******************************************************************************
NOTICE("BL31: %s\n", version_string);
NOTICE("BL31: %s\n", build_message);
- /* Perform remaining generic architectural setup from EL3 */
- bl31_arch_setup();
-
/* Perform platform setup in BL31 */
bl31_platform_setup();
is also used for diagnostic purposes
* `_start` and `_end` values must be based on the `OEN_*` values defined in
- [`smcc_helpers.h`]
+ [`smcc.h`]
* `_type` must be one of `SMC_TYPE_FAST` or `SMC_TYPE_STD`
SMC_RET3(handle, x0, x1, x2);
SMC_RET4(handle, x0, x1, x2, x3);
-The `reserved` parameter to the handler is reserved for future use and can be
-ignored. The value returned by a SMC handler is also reserved for future use -
-completion of the handler function must always be via one of the `SMC_RETn()`
-macros.
+The `cookie` parameter to the handler is reserved for future use and can be
+ignored. The `handle` is returned by the SMC handler - completion of the
+handler function must always be via one of the `SMC_RETn()` macros.
NOTE: The PSCI and Test Secure-EL1 Payload Dispatcher services do not follow
all of the above requirements yet.
[`lib/psci`]: ../lib/psci
[`std_svc_setup.c`]: ../services/std_svc/std_svc_setup.c
[`runtime_svc.h`]: ../include/common/runtime_svc.h
-[`smcc_helpers.h`]: ../include/common/smcc_helpers.h
+[`smcc.h`]: ../include/lib/smcc.h
[PSCI]: http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf "Power State Coordination Interface PDD (ARM DEN 0022C)"
[SMCCC]: http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html "SMC Calling Convention PDD (ARM DEN 0028A)"
/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
/*******************************************************************************
* Function prototypes
******************************************************************************/
-void bl31_arch_setup(void);
void bl31_next_el_arch_setup(uint32_t security_state);
void bl31_set_next_image_type(uint32_t type);
uint32_t bl31_get_next_image_type(void);
void bl31_prepare_next_image_entry(void);
void bl31_register_bl32_init(int32_t (*)(void));
+void bl31_warm_entrypoint(void);
#endif /* __BL31_H__ */
+++ /dev/null
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __SMCC_HELPERS_H__
-#define __SMCC_HELPERS_H__
-
-/*******************************************************************************
- * Bit definitions inside the function id as per the SMC calling convention
- ******************************************************************************/
-#define FUNCID_TYPE_SHIFT 31
-#define FUNCID_CC_SHIFT 30
-#define FUNCID_OEN_SHIFT 24
-#define FUNCID_NUM_SHIFT 0
-
-#define FUNCID_TYPE_MASK 0x1
-#define FUNCID_CC_MASK 0x1
-#define FUNCID_OEN_MASK 0x3f
-#define FUNCID_NUM_MASK 0xffff
-
-#define FUNCID_TYPE_WIDTH 1
-#define FUNCID_CC_WIDTH 1
-#define FUNCID_OEN_WIDTH 6
-#define FUNCID_NUM_WIDTH 16
-
-#define GET_SMC_CC(id) ((id >> FUNCID_CC_SHIFT) & \
- FUNCID_CC_MASK)
-#define GET_SMC_TYPE(id) ((id >> FUNCID_TYPE_SHIFT) & \
- FUNCID_TYPE_MASK)
-
-#define SMC_64 1
-#define SMC_32 0
-#define SMC_UNK 0xffffffff
-#define SMC_TYPE_FAST 1
-#define SMC_TYPE_STD 0
-#define SMC_PREEMPTED 0xfffffffe
-/*******************************************************************************
- * Owning entity number definitions inside the function id as per the SMC
- * calling convention
- ******************************************************************************/
-#define OEN_ARM_START 0
-#define OEN_ARM_END 0
-#define OEN_CPU_START 1
-#define OEN_CPU_END 1
-#define OEN_SIP_START 2
-#define OEN_SIP_END 2
-#define OEN_OEM_START 3
-#define OEN_OEM_END 3
-#define OEN_STD_START 4 /* Standard Calls */
-#define OEN_STD_END 4
-#define OEN_TAP_START 48 /* Trusted Applications */
-#define OEN_TAP_END 49
-#define OEN_TOS_START 50 /* Trusted OS */
-#define OEN_TOS_END 63
-#define OEN_LIMIT 64
-
-#ifndef __ASSEMBLY__
-
-#include <cassert.h>
-#include <context.h>
-#include <stdint.h>
-
-/* Various flags passed to SMC handlers */
-#define SMC_FROM_SECURE (0 << 0)
-#define SMC_FROM_NON_SECURE (1 << 0)
-
-#define is_caller_non_secure(_f) (!!(_f & SMC_FROM_NON_SECURE))
-#define is_caller_secure(_f) (!(is_caller_non_secure(_f)))
-
-/* Convenience macros to return from SMC handler */
-#define SMC_RET0(_h) { \
- return (uint64_t) (_h); \
-}
-#define SMC_RET1(_h, _x0) { \
- write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \
- SMC_RET0(_h); \
-}
-#define SMC_RET2(_h, _x0, _x1) { \
- write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \
- SMC_RET1(_h, (_x0)); \
-}
-#define SMC_RET3(_h, _x0, _x1, _x2) { \
- write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X2, (_x2)); \
- SMC_RET2(_h, (_x0), (_x1)); \
-}
-#define SMC_RET4(_h, _x0, _x1, _x2, _x3) { \
- write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3)); \
- SMC_RET3(_h, (_x0), (_x1), (_x2)); \
-}
-
-/*
- * Convenience macros to access general purpose registers using handle provided
- * to SMC handler. These takes the offset values defined in context.h
- */
-#define SMC_GET_GP(_h, _g) \
- read_ctx_reg(get_gpregs_ctx(_h), (_g))
-#define SMC_SET_GP(_h, _g, _v) \
- write_ctx_reg(get_gpregs_ctx(_h), (_g), (_v))
-
-/*
- * Convenience macros to access EL3 context registers using handle provided to
- * SMC handler. These takes the offset values defined in context.h
- */
-#define SMC_GET_EL3(_h, _e) \
- read_ctx_reg(get_el3state_ctx(_h), (_e))
-#define SMC_SET_EL3(_h, _e, _v) \
- write_ctx_reg(get_el3state_ctx(_h), (_e), (_v))
-
-/* The macro below is used to identify a Standard Service SMC call */
-#define is_std_svc_call(_fid) ((((_fid) >> FUNCID_OEN_SHIFT) & \
- FUNCID_OEN_MASK) == OEN_STD_START)
-
-/* The macro below is used to identify a valid Fast SMC call */
-#define is_valid_fast_smc(_fid) ((!(((_fid) >> 16) & 0xff)) && \
- (GET_SMC_TYPE(_fid) == SMC_TYPE_FAST))
-
-/*
- * Macro to define UUID for services. Apart from defining and initializing a
- * uuid_t structure, this macro verifies that the first word of the defined UUID
- * does not equal SMC_UNK. This is to ensure that the caller won't mistake the
- * returned UUID in x0 for an invalid SMC error return
- */
-#define DEFINE_SVC_UUID(_name, _tl, _tm, _th, _cl, _ch, \
- _n0, _n1, _n2, _n3, _n4, _n5) \
- CASSERT(_tl != SMC_UNK, invalid_svc_uuid);\
- static const uuid_t _name = { \
- _tl, _tm, _th, _cl, _ch, \
- { _n0, _n1, _n2, _n3, _n4, _n5 } \
- }
-
-/* Return a UUID in the SMC return registers */
-#define SMC_UUID_RET(_h, _uuid) \
- SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \
- ((const uint32_t *) &(_uuid))[1], \
- ((const uint32_t *) &(_uuid))[2], \
- ((const uint32_t *) &(_uuid))[3])
-
-#endif /*__ASSEMBLY__*/
-#endif /* __SMCC_HELPERS_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SMCC_HELPERS_H__
+#define __SMCC_HELPERS_H__
+
+#include <smcc.h>
+
+#ifndef __ASSEMBLY__
+#include <context.h>
+
+/* Convenience macros to return from SMC handler */
+#define SMC_RET0(_h) { \
+ return (uint64_t) (_h); \
+}
+#define SMC_RET1(_h, _x0) { \
+ write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \
+ SMC_RET0(_h); \
+}
+#define SMC_RET2(_h, _x0, _x1) { \
+ write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \
+ SMC_RET1(_h, (_x0)); \
+}
+#define SMC_RET3(_h, _x0, _x1, _x2) { \
+ write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X2, (_x2)); \
+ SMC_RET2(_h, (_x0), (_x1)); \
+}
+#define SMC_RET4(_h, _x0, _x1, _x2, _x3) { \
+ write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3)); \
+ SMC_RET3(_h, (_x0), (_x1), (_x2)); \
+}
+
+/*
+ * Convenience macros to access general purpose registers using handle provided
+ * to SMC handler. These take the offset values defined in context.h
+ */
+#define SMC_GET_GP(_h, _g) \
+ read_ctx_reg(get_gpregs_ctx(_h), (_g))
+#define SMC_SET_GP(_h, _g, _v) \
+ write_ctx_reg(get_gpregs_ctx(_h), (_g), (_v))
+
+/*
+ * Convenience macros to access EL3 context registers using handle provided to
+ * SMC handler. These take the offset values defined in context.h
+ */
+#define SMC_GET_EL3(_h, _e) \
+ read_ctx_reg(get_el3state_ctx(_h), (_e))
+#define SMC_SET_EL3(_h, _e, _v) \
+ write_ctx_reg(get_el3state_ctx(_h), (_e), (_v))
+
+/* Return a UUID in the SMC return registers */
+#define SMC_UUID_RET(_h, _uuid) \
+ SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \
+ ((const uint32_t *) &(_uuid))[1], \
+ ((const uint32_t *) &(_uuid))[2], \
+ ((const uint32_t *) &(_uuid))[3])
+
+#endif /*__ASSEMBLY__*/
+#endif /* __SMCC_HELPERS_H__ */
#define PSCI_NUM_CALLS 18
#endif
+/* The macros below are used to identify PSCI calls from the SMC function ID */
+#define PSCI_FID_MASK 0xffe0u
+#define PSCI_FID_VALUE 0u
+#define is_psci_fid(_fid) \
+ (((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE)
+
/*******************************************************************************
* PSCI Migrate and friends
******************************************************************************/
long psci_migrate_info_up_cpu(void);
int psci_features(unsigned int psci_fid);
void __dead2 psci_power_down_wfi(void);
-void psci_entrypoint(void);
-void psci_register_spd_pm_hook(const spd_pm_ops_t *);
-uintptr_t psci_smc_handler(uint32_t smc_fid,
+void psci_arch_setup(void);
+
+/*
+ * The below API is deprecated. This is now replaced by bl31_warmboot_entry in
+ * AArch64.
+ */
+void psci_entrypoint(void) __deprecated;
+
+/*******************************************************************************
+ * Forward declarations
+ ******************************************************************************/
+struct entry_point_info;
+
+/******************************************************************************
+ * PSCI Library Interfaces
+ *****************************************************************************/
+u_register_t psci_smc_handler(uint32_t smc_fid,
u_register_t x1,
u_register_t x2,
u_register_t x3,
void *cookie,
void *handle,
u_register_t flags);
-
-/* PSCI setup function */
-int psci_setup(void);
+int psci_setup(uintptr_t mailbox_ep);
+void psci_warmboot_entrypoint(void);
+void psci_register_spd_pm_hook(const spd_pm_ops_t *pm);
#endif /*__ASSEMBLY__*/
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SMCC_H__
+#define __SMCC_H__
+
+/*******************************************************************************
+ * Bit definitions inside the function id as per the SMC calling convention
+ ******************************************************************************/
+#define FUNCID_TYPE_SHIFT 31
+#define FUNCID_CC_SHIFT 30
+#define FUNCID_OEN_SHIFT 24
+#define FUNCID_NUM_SHIFT 0
+
+#define FUNCID_TYPE_MASK 0x1
+#define FUNCID_CC_MASK 0x1
+#define FUNCID_OEN_MASK 0x3f
+#define FUNCID_NUM_MASK 0xffff
+
+#define FUNCID_TYPE_WIDTH 1
+#define FUNCID_CC_WIDTH 1
+#define FUNCID_OEN_WIDTH 6
+#define FUNCID_NUM_WIDTH 16
+
+#define GET_SMC_CC(id) ((id >> FUNCID_CC_SHIFT) & \
+ FUNCID_CC_MASK)
+#define GET_SMC_TYPE(id) ((id >> FUNCID_TYPE_SHIFT) & \
+ FUNCID_TYPE_MASK)
+
+#define SMC_64 1
+#define SMC_32 0
+#define SMC_UNK 0xffffffff
+#define SMC_TYPE_FAST 1
+#define SMC_TYPE_STD 0
+#define SMC_PREEMPTED 0xfffffffe
+/*******************************************************************************
+ * Owning entity number definitions inside the function id as per the SMC
+ * calling convention
+ ******************************************************************************/
+#define OEN_ARM_START 0
+#define OEN_ARM_END 0
+#define OEN_CPU_START 1
+#define OEN_CPU_END 1
+#define OEN_SIP_START 2
+#define OEN_SIP_END 2
+#define OEN_OEM_START 3
+#define OEN_OEM_END 3
+#define OEN_STD_START 4 /* Standard Calls */
+#define OEN_STD_END 4
+#define OEN_TAP_START 48 /* Trusted Applications */
+#define OEN_TAP_END 49
+#define OEN_TOS_START 50 /* Trusted OS */
+#define OEN_TOS_END 63
+#define OEN_LIMIT 64
+
+#ifndef __ASSEMBLY__
+
+#include <cassert.h>
+#include <stdint.h>
+
+/* Various flags passed to SMC handlers */
+#define SMC_FROM_SECURE (0 << 0)
+#define SMC_FROM_NON_SECURE (1 << 0)
+
+#define is_caller_non_secure(_f) (!!(_f & SMC_FROM_NON_SECURE))
+#define is_caller_secure(_f) (!(is_caller_non_secure(_f)))
+
+/* The macro below is used to identify a Standard Service SMC call */
+#define is_std_svc_call(_fid) ((((_fid) >> FUNCID_OEN_SHIFT) & \
+ FUNCID_OEN_MASK) == OEN_STD_START)
+
+/* The macro below is used to identify a valid Fast SMC call */
+#define is_valid_fast_smc(_fid) ((!(((_fid) >> 16) & 0xff)) && \
+ (GET_SMC_TYPE(_fid) == SMC_TYPE_FAST))
+
+/*
+ * Macro to define UUID for services. Apart from defining and initializing a
+ * uuid_t structure, this macro verifies that the first word of the defined UUID
+ * does not equal SMC_UNK. This is to ensure that the caller won't mistake the
+ * returned UUID in x0 for an invalid SMC error return
+ */
+#define DEFINE_SVC_UUID(_name, _tl, _tm, _th, _cl, _ch, \
+ _n0, _n1, _n2, _n3, _n4, _n5) \
+ CASSERT(_tl != SMC_UNK, invalid_svc_uuid);\
+ static const uuid_t _name = { \
+ _tl, _tm, _th, _cl, _ch, \
+ { _n0, _n1, _n2, _n3, _n4, _n5 } \
+ }
+
+#endif /*__ASSEMBLY__*/
+#endif /* __SMCC_H__ */
#define STD_SVC_VERSION_MAJOR 0x0
#define STD_SVC_VERSION_MINOR 0x1
-/* The macros below are used to identify PSCI calls from the SMC function ID */
-#define PSCI_FID_MASK 0xffe0u
-#define PSCI_FID_VALUE 0u
-#define is_psci_fid(_fid) \
- (((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE)
-
#endif /* __STD_SVC_H__ */
+++ /dev/null
-/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include <el3_common_macros.S>
-#include <psci.h>
-#include <xlat_tables.h>
-
- .globl psci_entrypoint
- .globl psci_power_down_wfi
-
- /* --------------------------------------------------------------------
- * This CPU has been physically powered up. It is either resuming from
- * suspend or has simply been turned on. In both cases, call the power
- * on finisher.
- * --------------------------------------------------------------------
- */
-func psci_entrypoint
- /*
- * On the warm boot path, most of the EL3 initialisations performed by
- * 'el3_entrypoint_common' must be skipped:
- *
- * - Only when the platform bypasses the BL1/BL31 entrypoint by
- * programming the reset address do we need to set the CPU endianness.
- * In other cases, we assume this has been taken care by the
- * entrypoint code.
- *
- * - No need to determine the type of boot, we know it is a warm boot.
- *
- * - Do not try to distinguish between primary and secondary CPUs, this
- * notion only exists for a cold boot.
- *
- * - No need to initialise the memory or the C runtime environment,
- * it has been done once and for all on the cold boot path.
- */
- el3_entrypoint_common \
- _set_endian=PROGRAMMABLE_RESET_ADDRESS \
- _warm_boot_mailbox=0 \
- _secondary_cold_boot=0 \
- _init_memory=0 \
- _init_c_runtime=0 \
- _exception_vectors=runtime_exceptions
-
- /* --------------------------------------------
- * Enable the MMU with the DCache disabled. It
- * is safe to use stacks allocated in normal
- * memory as a result. All memory accesses are
- * marked nGnRnE when the MMU is disabled. So
- * all the stack writes will make it to memory.
- * All memory accesses are marked Non-cacheable
- * when the MMU is enabled but D$ is disabled.
- * So used stack memory is guaranteed to be
- * visible immediately after the MMU is enabled
- * Enabling the DCache at the same time as the
- * MMU can lead to speculatively fetched and
- * possibly stale stack memory being read from
- * other caches. This can lead to coherency
- * issues.
- * --------------------------------------------
- */
- mov x0, #DISABLE_DCACHE
- bl bl31_plat_enable_mmu
-
- bl psci_power_up_finish
-
- b el3_exit
-endfunc psci_entrypoint
-
- /* --------------------------------------------
- * This function is called to indicate to the
- * power controller that it is safe to power
- * down this cpu. It should not exit the wfi
- * and will be released from reset upon power
- * up. 'wfi_spill' is used to catch erroneous
- * exits from wfi.
- * --------------------------------------------
- */
-func psci_power_down_wfi
- dsb sy // ensure write buffer empty
- wfi
- bl plat_panic_handler
-endfunc psci_power_down_wfi
-
.globl psci_do_pwrdown_cache_maintenance
.globl psci_do_pwrup_cache_maintenance
+ .globl psci_power_down_wfi
+#if !ERROR_DEPRECATED
+ .globl psci_entrypoint
+#endif
/* -----------------------------------------------------------------------
* void psci_do_pwrdown_cache_maintenance(unsigned int power level);
ldp x29, x30, [sp], #16
ret
endfunc psci_do_pwrup_cache_maintenance
+
+/* -----------------------------------------------------------------------
+ * void psci_power_down_wfi(void);
+ * This function is called to indicate to the power controller that it
+ * is safe to power down this cpu. It should not exit the wfi and will
+ * be released from reset upon power up.
+ * -----------------------------------------------------------------------
+ */
+func psci_power_down_wfi
+ dsb sy // ensure write buffer empty
+ wfi
+ bl plat_panic_handler
+endfunc psci_power_down_wfi
+
+/* -----------------------------------------------------------------------
+ * void psci_entrypoint(void);
+ * The deprecated entry point for PSCI on warm boot for AArch64.
+ * -----------------------------------------------------------------------
+ */
+func_deprecated psci_entrypoint
+ b bl31_warm_entrypoint
+endfunc_deprecated psci_entrypoint
* code to enable the gic cpu interface and for a cluster it will enable
* coherency at the interconnect level in addition to gic cpu interface.
******************************************************************************/
-void psci_power_up_finish(void)
+void psci_warmboot_entrypoint(void)
{
unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos();
psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
# POSSIBILITY OF SUCH DAMAGE.
#
-PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \
- lib/el3_runtime/aarch64/context.S \
- lib/el3_runtime/aarch64/cpu_data.S \
- lib/el3_runtime/aarch64/context_mgmt.c \
- lib/cpus/aarch64/cpu_helpers.S \
- lib/locks/exclusive/spinlock.S \
- lib/psci/psci_off.c \
- lib/psci/psci_on.c \
- lib/psci/psci_suspend.c \
- lib/psci/psci_common.c \
- lib/psci/psci_main.c \
- lib/psci/psci_setup.c \
- lib/psci/psci_system_off.c \
- lib/psci/aarch64/psci_entry.S \
- lib/psci/aarch64/psci_helpers.S \
+PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \
+ lib/el3_runtime/aarch64/context.S \
+ lib/el3_runtime/aarch64/cpu_data.S \
+ lib/el3_runtime/aarch64/context_mgmt.c \
+ lib/cpus/aarch64/cpu_helpers.S \
+ lib/locks/exclusive/spinlock.S \
+ lib/psci/psci_off.c \
+ lib/psci/psci_on.c \
+ lib/psci/psci_suspend.c \
+ lib/psci/psci_common.c \
+ lib/psci/psci_main.c \
+ lib/psci/psci_setup.c \
+ lib/psci/psci_system_off.c \
+ lib/psci/aarch64/psci_helpers.S
ifeq (${USE_COHERENT_MEM}, 1)
PSCI_LIB_SOURCES += lib/locks/bakery/bakery_lock_coherent.c
#include <assert.h>
#include <debug.h>
#include <platform.h>
-#include <runtime_svc.h>
-#include <std_svc.h>
+#include <smcc.h>
#include <string.h>
#include "psci_private.h"
/*******************************************************************************
* PSCI top level handler for servicing SMCs.
******************************************************************************/
-uintptr_t psci_smc_handler(uint32_t smc_fid,
+u_register_t psci_smc_handler(uint32_t smc_fid,
u_register_t x1,
u_register_t x2,
u_register_t x3,
u_register_t flags)
{
if (is_caller_secure(flags))
- SMC_RET1(handle, SMC_UNK);
+ return SMC_UNK;
/* Check the fid against the capabilities */
if (!(psci_caps & define_psci_cap(smc_fid)))
- SMC_RET1(handle, SMC_UNK);
+ return SMC_UNK;
if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
/* 32-bit PSCI function, clear top parameter bits */
switch (smc_fid) {
case PSCI_VERSION:
- SMC_RET1(handle, psci_version());
+ return psci_version();
case PSCI_CPU_OFF:
- SMC_RET1(handle, psci_cpu_off());
+ return psci_cpu_off();
case PSCI_CPU_SUSPEND_AARCH32:
- SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
+ return psci_cpu_suspend(x1, x2, x3);
case PSCI_CPU_ON_AARCH32:
- SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
+ return psci_cpu_on(x1, x2, x3);
case PSCI_AFFINITY_INFO_AARCH32:
- SMC_RET1(handle, psci_affinity_info(x1, x2));
+ return psci_affinity_info(x1, x2);
case PSCI_MIG_AARCH32:
- SMC_RET1(handle, psci_migrate(x1));
+ return psci_migrate(x1);
case PSCI_MIG_INFO_TYPE:
- SMC_RET1(handle, psci_migrate_info_type());
+ return psci_migrate_info_type();
case PSCI_MIG_INFO_UP_CPU_AARCH32:
- SMC_RET1(handle, psci_migrate_info_up_cpu());
+ return psci_migrate_info_up_cpu();
case PSCI_SYSTEM_SUSPEND_AARCH32:
- SMC_RET1(handle, psci_system_suspend(x1, x2));
+ return psci_system_suspend(x1, x2);
case PSCI_SYSTEM_OFF:
psci_system_off();
/* We should never return from psci_system_reset() */
case PSCI_FEATURES:
- SMC_RET1(handle, psci_features(x1));
+ return psci_features(x1);
#if ENABLE_PSCI_STAT
case PSCI_STAT_RESIDENCY_AARCH32:
- SMC_RET1(handle, psci_stat_residency(x1, x2));
+ return psci_stat_residency(x1, x2);
case PSCI_STAT_COUNT_AARCH32:
- SMC_RET1(handle, psci_stat_count(x1, x2));
+ return psci_stat_count(x1, x2);
#endif
default:
switch (smc_fid) {
case PSCI_CPU_SUSPEND_AARCH64:
- SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
+ return psci_cpu_suspend(x1, x2, x3);
case PSCI_CPU_ON_AARCH64:
- SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
+ return psci_cpu_on(x1, x2, x3);
case PSCI_AFFINITY_INFO_AARCH64:
- SMC_RET1(handle, psci_affinity_info(x1, x2));
+ return psci_affinity_info(x1, x2);
case PSCI_MIG_AARCH64:
- SMC_RET1(handle, psci_migrate(x1));
+ return psci_migrate(x1);
case PSCI_MIG_INFO_UP_CPU_AARCH64:
- SMC_RET1(handle, psci_migrate_info_up_cpu());
+ return psci_migrate_info_up_cpu();
case PSCI_SYSTEM_SUSPEND_AARCH64:
- SMC_RET1(handle, psci_system_suspend(x1, x2));
+ return psci_system_suspend(x1, x2);
#if ENABLE_PSCI_STAT
case PSCI_STAT_RESIDENCY_AARCH64:
- SMC_RET1(handle, psci_stat_residency(x1, x2));
+ return psci_stat_residency(x1, x2);
case PSCI_STAT_COUNT_AARCH64:
- SMC_RET1(handle, psci_stat_count(x1, x2));
+ return psci_stat_count(x1, x2);
#endif
default:
}
WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
- SMC_RET1(handle, SMC_UNK);
+ return SMC_UNK;
}
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
-#include <bl31.h>
#include <debug.h>
#include <context_mgmt.h>
#include <platform.h>
-#include <runtime_svc.h>
#include <stddef.h>
#include "psci_private.h"
* on have completed. Perform enough arch.initialization
* to run in the non-secure address space.
*/
- bl31_arch_setup();
+ psci_arch_setup();
/*
* Lock the CPU spin lock to make sure that the context initialization
void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
int psci_validate_mpidr(u_register_t mpidr);
void psci_init_req_local_pwr_states(void);
-void psci_power_up_finish(void);
int psci_validate_entry_point(entry_point_info_t *ep,
uintptr_t entrypoint, u_register_t context_id);
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
}
/*******************************************************************************
- * This function initializes the power domain topology tree by querying the
- * platform. The power domain nodes higher than the CPU are populated in the
- * array psci_non_cpu_pd_nodes[] and the CPU power domains are populated in
- * psci_cpu_pd_nodes[]. The platform exports its static topology map through the
+ * This function does the architectural setup and takes the warm boot
+ * entry-point `mailbox_ep` as an argument. The function also initializes the
+ * power domain topology tree by querying the platform. The power domain nodes
+ * higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and
+ * the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform
+ * exports its static topology map through the
* populate_power_domain_topology_tree() API. The algorithm populates the
* psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
- * topology map. On a platform that implements two clusters of 2 cpus each, and
- * supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would look
- * like this:
+ * topology map. On a platform that implements two clusters of 2 cpus each,
+ * and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would
+ * look like this:
*
* ---------------------------------------------------
* | system node | cluster 0 node | cluster 1 node |
* | CPU 0 | CPU 1 | CPU 2 | CPU 3 |
* ------------------------------------------------
******************************************************************************/
-int psci_setup(void)
+int psci_setup(uintptr_t mailbox_ep)
{
const unsigned char *topology_tree;
+ /* Do the Architectural initialization */
+ psci_arch_setup();
+
/* Query the topology map from the platform */
topology_tree = plat_get_power_domain_tree_desc();
*/
psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
- plat_setup_psci_ops((uintptr_t)psci_entrypoint,
- &psci_plat_pm_ops);
+ assert(mailbox_ep);
+ plat_setup_psci_ops(mailbox_ep, &psci_plat_pm_ops);
assert(psci_plat_pm_ops);
/* Initialize the psci capability */
return 0;
}
+
+/*******************************************************************************
+ * This duplicates what the primary cpu did after a cold boot in BL1. The same
+ * needs to be done when a cpu is hotplugged in. This function could also over-
+ * ride any EL3 setup done by BL1 as this code resides in rw memory.
+ ******************************************************************************/
+void psci_arch_setup(void)
+{
+ /* Program the counter frequency */
+ write_cntfrq_el0(plat_get_syscnt_freq2());
+
+ /* Initialize the cpu_ops pointer. */
+ init_cpu_ops();
+}
#include <cpu_data.h>
#include <debug.h>
#include <platform.h>
-#include <runtime_svc.h>
#include <stddef.h>
#include "psci_private.h"
#include <debug.h>
#include <psci.h>
#include <runtime_svc.h>
+#include <smcc_helpers.h>
#include <std_svc.h>
#include <stdint.h>
#include <uuid.h>
0x108d905b, 0xf863, 0x47e8, 0xae, 0x2d,
0xc0, 0xfb, 0x56, 0x41, 0xf6, 0xe2);
-/* Setup Standard Services */
-static int32_t std_svc_setup(void)
-{
- /*
- * PSCI is the only specification implemented as a Standard Service.
- * Invoke PSCI setup from here
- */
- return psci_setup();
-}
-
/*
* Top-level Standard Service SMC handler. This handler will in turn dispatch
* calls to PSCI SMC handler
* value
*/
if (is_psci_fid(smc_fid)) {
- return psci_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
- handle, flags);
+ SMC_RET1(handle,
+ psci_smc_handler(smc_fid, x1, x2, x3, x4,
+ cookie, handle, flags));
}
switch (smc_fid) {
OEN_STD_START,
OEN_STD_END,
SMC_TYPE_FAST,
- std_svc_setup,
+ NULL,
std_svc_smc_handler
);